From 830c460dc6f83acfe7e42ff4dd02304815a11b4f Mon Sep 17 00:00:00 2001 From: Keir Fraser Date: Wed, 14 May 2008 09:52:25 +0100 Subject: [PATCH] Fix XEN_SYSCTL_physinfo to handle NUMA info properly. Signed-off-by: Andre Przywara Signed-off-by: Keir Fraser --- xen/arch/ia64/xen/dom0_ops.c | 10 +++++++--- xen/arch/x86/sysctl.c | 19 ++++++++++++++----- 2 files changed, 21 insertions(+), 8 deletions(-) diff --git a/xen/arch/ia64/xen/dom0_ops.c b/xen/arch/ia64/xen/dom0_ops.c index af4e6b555f..4d00c68583 100644 --- a/xen/arch/ia64/xen/dom0_ops.c +++ b/xen/arch/ia64/xen/dom0_ops.c @@ -407,10 +407,15 @@ long arch_do_sysctl(xen_sysctl_t *op, XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl) { int i; uint32_t max_array_ent; + XEN_GUEST_HANDLE_64(uint32) cpu_to_node_arr; xen_sysctl_physinfo_t *pi = &op->u.physinfo; + max_array_ent = pi->max_cpu_id; + cpu_to_node_arr = pi->cpu_to_node; + memset(pi, 0, sizeof(*pi)); + pi->cpu_to_node = cpu_to_node_arr; pi->threads_per_core = cpus_weight(cpu_sibling_map[0]); pi->cores_per_socket = cpus_weight(cpu_core_map[0]) / pi->threads_per_core; @@ -421,16 +426,15 @@ long arch_do_sysctl(xen_sysctl_t *op, XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl) pi->scrub_pages = avail_scrub_pages(); pi->cpu_khz = local_cpu_data->proc_freq / 1000; - max_array_ent = pi->max_cpu_id; pi->max_cpu_id = last_cpu(cpu_online_map); max_array_ent = min_t(uint32_t, max_array_ent, pi->max_cpu_id); ret = 0; - if (!guest_handle_is_null(pi->cpu_to_node)) { + if (!guest_handle_is_null(cpu_to_node_arr)) { for (i = 0; i <= max_array_ent; i++) { uint32_t node = cpu_online(i) ? cpu_to_node(i) : ~0u; - if (copy_to_guest_offset(pi->cpu_to_node, i, &node, 1)) { + if (copy_to_guest_offset(cpu_to_node_arr, i, &node, 1)) { ret = -EFAULT; break; } diff --git a/xen/arch/x86/sysctl.c b/xen/arch/x86/sysctl.c index 13f177a7fa..2fc125d4b9 100644 --- a/xen/arch/x86/sysctl.c +++ b/xen/arch/x86/sysctl.c @@ -40,6 +40,7 @@ long arch_do_sysctl( case XEN_SYSCTL_physinfo: { uint32_t i, max_array_ent; + XEN_GUEST_HANDLE_64(uint32) cpu_to_node_arr; xen_sysctl_physinfo_t *pi = &sysctl->u.physinfo; @@ -47,7 +48,11 @@ long arch_do_sysctl( if ( ret ) break; + max_array_ent = pi->max_cpu_id; + cpu_to_node_arr = pi->cpu_to_node; + memset(pi, 0, sizeof(*pi)); + pi->cpu_to_node = cpu_to_node_arr; pi->threads_per_core = cpus_weight(cpu_sibling_map[0]); pi->cores_per_socket = @@ -64,22 +69,26 @@ long arch_do_sysctl( if ( iommu_enabled ) pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm_directio; - max_array_ent = pi->max_cpu_id; pi->max_cpu_id = last_cpu(cpu_online_map); max_array_ent = min_t(uint32_t, max_array_ent, pi->max_cpu_id); - ret = -EFAULT; - if ( !guest_handle_is_null(pi->cpu_to_node) ) + ret = 0; + + if ( !guest_handle_is_null(cpu_to_node_arr) ) { for ( i = 0; i <= max_array_ent; i++ ) { uint32_t node = cpu_online(i) ? cpu_to_node(i) : ~0u; - if ( copy_to_guest_offset(pi->cpu_to_node, i, &node, 1) ) + if ( copy_to_guest_offset(cpu_to_node_arr, i, &node, 1) ) + { + ret = -EFAULT; break; + } } } - ret = copy_to_guest(u_sysctl, sysctl, 1) ? -EFAULT : 0; + if ( copy_to_guest(u_sysctl, sysctl, 1) ) + ret = -EFAULT; } break; -- 2.30.2